vpsr.val = VCPU(vcpu, vpsr);
vcpu_get_rr(vcpu, vadr, &vrr.rrval);
- vmx_vcpu_get_pta(vcpu,&vpta.val);
+ vpta.val = vmx_vcpu_get_pta(vcpu);
if ( vrr.ve & vpta.ve ) {
switch ( ref ) {
}
-IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval)
+u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr)
{
PTA vpta;
ia64_rr vrr;
+ u64 pval;
u64 vhpt_offset;
- vmx_vcpu_get_pta(vcpu, &vpta.val);
+ vpta.val = vmx_vcpu_get_pta(vcpu);
vcpu_get_rr(vcpu, vadr, &vrr.rrval);
if(vpta.vf){
- *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
- *pval = vpta.val & ~0xffff;
+ pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval,
+ vpta.val, 0, 0, 0, 0);
+ pval = vpta.val & ~0xffff;
}else{
vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
- *pval = (vadr&VRN_MASK)|
+ pval = (vadr & VRN_MASK) |
(vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
vhpt_offset;
}
- return IA64_NO_FAULT;
+ return pval;
}
-IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval)
+u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr)
{
ia64_rr vrr;
PTA vpta;
- vmx_vcpu_get_pta(vcpu, &vpta.val);
+ u64 pval;
+ vpta.val = vmx_vcpu_get_pta(vcpu);
vcpu_get_rr(vcpu, vadr, &vrr.rrval);
if(vpta.vf){
- *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
+ pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0);
}else{
- *pval = 1;
+ pval = 1;
}
- return IA64_NO_FAULT;
+ return pval;
}
}
}
else{
- vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
+ vhpt_adr = vmx_vcpu_thash(vcpu, vadr);
data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
if(data){
if(vpsr.ic){
}
}
-IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key)
+u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr)
{
thash_data_t *data;
PTA vpta;
- vmx_vcpu_get_pta(vcpu, &vpta.val);
+ u64 key;
+ vpta.val = vmx_vcpu_get_pta(vcpu);
if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
- *key=1;
- return IA64_NO_FAULT;
+ key=1;
+ return key;
}
data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
if(!data||!data->p){
- *key=1;
+ key = 1;
}else{
- *key=data->key;
+ key = data->key;
}
- return IA64_NO_FAULT;
+ return key;
}
return IA64_NO_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_thash(vcpu, r3, &r1);
+ r1 = vmx_vcpu_thash(vcpu, r3);
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_NO_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_ttag(vcpu, r3, &r1);
+ r1 = vmx_vcpu_ttag(vcpu, r3);
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_FAULT;
#endif
}
- if(vmx_vcpu_tak(vcpu, r3, &r1)){
- return IA64_FAULT;
- }
+ r1 = vmx_vcpu_tak(vcpu, r3);
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
return IA64_FAULT;
}
#endif // CHECK_FAULT
- vmx_vcpu_get_itc(vcpu,&r1);
+ r1 = vmx_vcpu_get_itc(vcpu);
vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
return IA64_NO_FAULT;
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_get_pkr(vcpu,r3,&r1);
+ r1 = vmx_vcpu_get_pkr(vcpu, r3);
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_get_dbr(vcpu,r3,&r1);
+ r1 = vmx_vcpu_get_dbr(vcpu, r3);
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_get_ibr(vcpu,r3,&r1);
+ r1 = vmx_vcpu_get_ibr(vcpu, r3);
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_get_pmc(vcpu,r3,&r1);
+ r1 = vmx_vcpu_get_pmc(vcpu, r3);
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
return IA64_FAULT;
}
#endif //CHECK_FAULT
- vmx_vcpu_get_cpuid(vcpu,r3,&r1);
+ r1 = vmx_vcpu_get_cpuid(vcpu, r3);
return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
}
((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
vcpu_set_gr(vcpu, tgt, val,0):fault;
+//#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0)
+
+/*
#define vmx_cr_get(cr) \
((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
vcpu_set_gr(vcpu, tgt, val,0):fault;
+*/
+
+#define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0))
static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
{
case 25:return cr_get(iha);
case 64:return vmx_cr_get(lid);
case 65:
- vmx_vcpu_get_ivr(vcpu,&val);
+ val = vmx_vcpu_get_ivr(vcpu);
return vcpu_set_gr(vcpu,tgt,val,0);
case 66:return vmx_cr_get(tpr);
case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, u64 value);
extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
-extern IA64FAULT vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
+extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg);
IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
-extern IA64FAULT vmx_vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
+extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr);
extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
-extern IA64FAULT vmx_vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * pval);
+extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr);
extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
-extern IA64FAULT vmx_vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
+extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr);
extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
VCPU control register access routines
**************************************************************************/
-static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_itm(VCPU * vcpu)
{
- *pval = VCPU(vcpu, itm);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, itm));
}
-static inline IA64FAULT vmx_vcpu_get_iva(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_iva(VCPU * vcpu)
{
- *pval = VCPU(vcpu, iva);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, iva));
}
-static inline IA64FAULT vmx_vcpu_get_pta(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_pta(VCPU * vcpu)
{
- *pval = VCPU(vcpu, pta);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, pta));
}
-static inline IA64FAULT vmx_vcpu_get_lid(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_lid(VCPU * vcpu)
{
- *pval = VCPU(vcpu, lid);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, lid));
}
-static inline IA64FAULT vmx_vcpu_get_ivr(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu)
{
- *pval = guest_read_vivr(vcpu);
- return IA64_NO_FAULT;
+ return ((u64)guest_read_vivr(vcpu));
}
-static inline IA64FAULT vmx_vcpu_get_tpr(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu)
{
- *pval = VCPU(vcpu, tpr);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, tpr));
}
-static inline IA64FAULT vmx_vcpu_get_eoi(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu)
{
- *pval = 0L; // reads of eoi always return 0
- return IA64_NO_FAULT;
+ return (0UL); // reads of eoi always return 0
}
-static inline IA64FAULT vmx_vcpu_get_irr0(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu)
{
- *pval = VCPU(vcpu, irr[0]);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, irr[0]));
}
-static inline IA64FAULT vmx_vcpu_get_irr1(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu)
{
- *pval = VCPU(vcpu, irr[1]);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, irr[1]));
}
-static inline IA64FAULT vmx_vcpu_get_irr2(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu)
{
- *pval = VCPU(vcpu, irr[2]);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, irr[2]));
}
-static inline IA64FAULT vmx_vcpu_get_irr3(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu)
{
- *pval = VCPU(vcpu, irr[3]);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, irr[3]));
}
-static inline IA64FAULT vmx_vcpu_get_itv(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_itv(VCPU * vcpu)
{
- *pval = VCPU(vcpu, itv);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, itv));
}
-static inline IA64FAULT vmx_vcpu_get_pmv(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu)
{
- *pval = VCPU(vcpu, pmv);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, pmv));
}
-static inline IA64FAULT vmx_vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu)
{
- *pval = VCPU(vcpu, cmcv);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, cmcv));
}
-static inline IA64FAULT vmx_vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu)
{
- *pval = VCPU(vcpu, lrr0);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, lrr0));
}
-static inline IA64FAULT vmx_vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
+static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu)
{
- *pval = VCPU(vcpu, lrr1);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, lrr1));
}
static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
return IA64_NO_FAULT;
}
-static inline IA64FAULT vmx_vcpu_get_itc(VCPU * vcpu, u64 * val)
+static inline u64 vmx_vcpu_get_itc(VCPU * vcpu)
{
- *val = vtm_get_itc(vcpu);
- return IA64_NO_FAULT;
+ return ((u64)vtm_get_itc(vcpu));
}
/*
VCPU debug breakpoint register access routines
**************************************************************************/
-static inline IA64FAULT vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
+static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg)
{
// TODO: unimplemented DBRs return a reserved register fault
// TODO: Should set Logical CPU state, not just physical
panic_domain(vcpu_regs(vcpu),
"there are only five cpuid registers");
}
- *pval = VCPU(vcpu, vcpuid[reg]);
- return IA64_NO_FAULT;
+ return ((u64)VCPU(vcpu, vcpuid[reg]));
}
static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
return IA64_NO_FAULT;
}
-static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
+static inline u64 vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg)
{
// TODO: unimplemented DBRs return a reserved register fault
- u64 val = ia64_get_dbr(reg);
- *pval = val;
- return IA64_NO_FAULT;
+ return ((u64)ia64_get_dbr(reg));
}
-static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
+static inline u64 vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg)
{
// TODO: unimplemented IBRs return a reserved register fault
- u64 val = ia64_get_ibr(reg);
- *pval = val;
- return IA64_NO_FAULT;
+ return ((u64)ia64_get_ibr(reg));
}
/**************************************************************************
return IA64_NO_FAULT;
}
-static inline IA64FAULT vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
+static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg)
{
// NOTE: Reads from unimplemented PMC registers return zero
- u64 val = (u64) ia64_get_pmc(reg);
- *pval = val;
- return IA64_NO_FAULT;
+ return ((u64)ia64_get_pmc(reg));
}
-static inline IA64FAULT vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
+static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg)
{
// NOTE: Reads from unimplemented PMD registers return zero
- u64 val = (u64) ia64_get_pmd(reg);
- *pval = val;
- return IA64_NO_FAULT;
+ return ((u64)ia64_get_pmd(reg));
}
/**************************************************************************